home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Games of Daze
/
Infomagic - Games of Daze (Summer 1995) (Disc 1 of 2).iso
/
x2ftp
/
msdos
/
ai
/
netstuff
/
ham.c
< prev
next >
Wrap
C/C++ Source or Header
|
1987-10-05
|
7KB
|
335 lines
/* ham.c
* c version of the hamming net
* david leasure
* 9/25/87
*
* this routine is a hamming classification network
* described in IEEE ASSP April 1987 by Richard P. Lippmann pg. 9
* correcting for a presumed bug in the presented routine
* the bug is the value set for THETA by Lippmann. When THETA is
* N / 2 it so overwhelms the outputs from the lower net that only 0
* activation values are passed up from the threshold function.
* I have chosen to set epsilon to 1 / 2M and to not have an upper
* limit on the threshold function so no saturation occurs
*
* the program is somewhat inefficient because of the use of
* data storage for maxnet (t[k,l] in lippman's) and for output[t,M]
* but they could be useful in a simulator of this network which allowed
* things to be fiddled with.
* the code could be improved by not encoding the size and values
* of the node matrices directly, too, reading them instead from files
* and/or a user interface.
*
* if you improve the code, please send me the diff's
* david e. leasure
* ihnp4!homxc!del or del@homxc.att.com
*/
/* EMACS_MODES: tabstop=4 c */
#include <stdio.h>
#include <ctype.h>
/* number of bits per exemplar (7x5) */
#define N 35
/*number of exemplars */
#define M 10
/* presentation (r,c) */
#define rows 7
#define cols 5
/* define maximum number of iterations before convergence */
#define MAX_ITERATION 20
/* define the exemplary training data */
/* should be replaced later with a read routine */
static int exemplars[M][N] = {
/* 0 */
-1,1,1,1,-1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,-1,
/* 1 */
-1,-1,-1,-1,1,
-1,-1,-1,1,1,
-1,-1,-1,-1,1,
-1,-1,-1,-1,1,
-1,-1,-1,-1,1,
-1,-1,-1,-1,1,
-1,-1,-1,-1,1,
/* 2 */
-1,1,1,1,-1,
1,-1,-1,-1,1,
-1,-1,-1,-1,1,
-1,-1,-1,1,-1,
-1,-1,1,-1,-1,
-1,1,-1,-1,-1,
1,1,1,1,1,
/* 3 */
1,1,1,1,1,
-1,-1,-1,1,-1,
-1,-1,1,-1,-1,
-1,1,1,1,-1,
-1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,-1,
/* 4 */
-1,-1,-1,1,-1,
-1,-1,1,1,-1,
-1,1,-1,1,-1,
1,-1,-1,1,-1,
1,1,1,1,1,
-1,-1,-1,1,-1,
-1,-1,-1,1,-1,
/* 5 */
1,1,1,1,1,
1,-1,-1,-1,-1,
1,1,1,1,-1,
-1,-1,-1,-1,1,
-1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,-1,
/* 6 */
-1,-1,1,1,-1,
-1,1,-1,-1,-1,
1,-1,-1,-1,-1,
1,1,1,1,-1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,-1,
/* 7 */
1,1,1,1,1,
-1,-1,-1,-1,1,
-1,-1,-1,1,-1,
-1,-1,-1,1,-1,
-1,-1,1,-1,-1,
-1,-1,1,-1,-1,
-1,-1,1,-1,-1,
/* 8 */
-1,1,1,1,-1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,-1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,-1,
/* 9 */
-1,1,1,1,-1,
1,-1,-1,-1,1,
1,-1,-1,-1,1,
-1,1,1,1,1,
-1,-1,-1,-1,1,
-1,-1,-1,1,-1,
-1,1,1,-1,-1};
static float w[N][M]; /* weights of lower subnet */
static float maxnet[M][M]; /* weights of maxnet */
/* input pattern */
static int x[N] =
{/* altered 5 with 4 flipped bits */
/* 9 */
-1,-1,1,1,-1,
-1,-1,-1,1,1,
1,-1,1,-1,1,
1,1,-1,1,-1,
-1,-1,-1,1,-1,
-1,-1,1,1,-1,
1,1,1,-1,-1};
/* input vector */
static output[MAX_ITERATION][M]; /* output at time t matrix */
#define THETA 0.0 /* N / 2.0 */
#define EPSILON 1.0 / (2.0 * M)
/* INIT_LOWER
* initialize the weight matrix for the lower network
*/
int init_lower()
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
w[i][j] = exemplars[j][i]/2.0;
}
}
}
/* INIT_UPPER
* init the weight matrix for the upper maxnet
*/
int init_upper()
{
int k, l;
for (k=0; k<M; k++) {
for (l=0; l<M; l++) {
if (k==l)
maxnet[k][l] = 1.0;
else
maxnet[k][l] = -1.0 / 20.0;
}
}
}
/* INIT_SUM
* the code to perform the summation of the weight/input value products
* for each output, i.e. (sum(i=0..N-1) w[i][j]*x[i]) - THETA
*/
float init_sum(j)
int j;
{
int i;
float sum;
sum = 0;
for (i=0; i<N; i++) {
sum = sum + (w[i][j] * x[i]);
}
return(sum - THETA);
}
/* CONVERGE_SUM(J,T)
* perform the sum for the maxnet output calculation
* i.e., output[j](t) - epsilon * sum(k<>j where j,k=0..M)output[k](t)
*/
float converge_sum(j,t)
int j,t;
{
int k, sum;
sum = 0;
for (k=0; k<M; k++)
if (k != j) sum = sum + output[t][k];
sum = output[t][j] - EPSILON * sum;
return(sum);
}
/* F_THRESH(ALPHA)
* implement a simple threshold logic nonlinearity
* I have chosen not to give it a saturation cutoff
*/
float f_thresh(alpha)
float alpha;
{
if (alpha <= 0.0)
return(0.0);
else
return(alpha);
}
/* INIT_UNKNOWN()
* apply the input to the lower net and derive state 0 of the maxnet
*/
int init_unknown()
{
int j;
for (j=0; j<M; j++) {
output[0][j] = f_thresh(init_sum(j));
}
}
/* CONVERGE()
* perform up to MAX_ITERATIONs of the convergence function
* answer found when only one of the maxnet outputs is high
* that output indexes the exemplars for the correct pattern
*/
int converge()
{
int t, count, j, winner;
count = 2; /* prime the pump */
winner = -1; /* start with "no winner" */
for (t=1; t<MAX_ITERATION && count>1; t++) {
count = 0;
for (j=0; j<M; j++) {
if ((output[t][j] = f_thresh(converge_sum(j,t - 1))) > 0) {
winner = j;
count++;
}
}
}
if (count != 1)
winner = -1; /* error condition not supposed to occur */
return(winner);
}
/* SHOW_EXEMPLAR(EX)
* print the exemplar(ex) using .'s for -1 and *'s for +1
* break the lines so the pattern is correctly seen
*/
int show_exemplar(ex)
int ex[];
{
int c, i;
c = 0;
for (i=0; i<N; i++) {
if (ex[i] < 0) {
printf(".");
}
else {
printf("*");
}
if (++c == cols) {
printf("\n");
c = 0;
}
}
printf("\n");
}
/* SHOW_EXEMPLARS()
* show all the exemplars
*/
int show_exemplars()
{
int j;
for (j=0; j<M; j++) {
printf("Exemplar %d:\n",j);
show_exemplar(&exemplars[j][0]);
}
printf("\n");
}
main()
{
int winner;
printf("Hamming Neural Net Example\n\n");
/*
show_exemplars();
*/
init_lower();
init_upper();
init_unknown();
winner = converge();
if (winner >= 0) {
printf("The winner is %d.\n", winner);
printf("\nInput pattern:\n");
show_exemplar(&x[0]);
printf("\n\nExemplar:\n");
show_exemplar(&exemplars[winner][0]);
printf("\n");
}
else { /* according to lippmann, this should never happen */
printf("Something's wrong. No winner.\n");
}
exit(0);
}